[IA64] EFI mapping: restoring mapping correctly.
authorIsaku Yamahata <yamahata@valinux.co.jp>
Mon, 25 Aug 2008 10:04:37 +0000 (19:04 +0900)
committerIsaku Yamahata <yamahata@valinux.co.jp>
Mon, 25 Aug 2008 10:04:37 +0000 (19:04 +0900)
When swiching back from efi mapping, correctly switch back
depending on the current vcpu type.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
xen/arch/ia64/vmx/vmx_phy_mode.c
xen/arch/ia64/vmx/vmx_vcpu.c
xen/arch/ia64/xen/regionreg.c
xen/include/asm-ia64/linux-xen/linux/efi.h
xen/include/asm-ia64/regionreg.h
xen/include/asm-ia64/vmx_vcpu.h

index 7f95eeced08b76b4ef695a629070e3c8b101e26d..5d347719604d9e589d9829fb42b065655a495297 100644 (file)
@@ -169,9 +169,7 @@ vmx_load_all_rr(VCPU *vcpu)
        ia64_dv_serialize_data();
        ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
        ia64_dv_serialize_data();
-       vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
-                      (void *)vcpu->arch.vhpt.hash,
-                      vcpu->arch.privregs);
+       vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, VMX(vcpu, vrr[VRN7])));
        ia64_set_pta(VMX(vcpu, mpta));
        vmx_ia64_set_dcr(vcpu);
 
index 541c7205c426da7f6e8dc39f01a261dc5a876350..aaa513c5f98f9ea0bcacfac7f2616530999c955f 100644 (file)
@@ -196,13 +196,17 @@ void vmx_vcpu_set_rr_fast(VCPU *vcpu, u64 reg, u64 val)
     }
 }
 
-void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
-                    void *shared_arch_info)
+void __vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid)
 {
-    __get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt;
-    __get_cpu_var(inserted_vpd) = (unsigned long)shared_arch_info;
-    __get_cpu_var(inserted_mapped_regs) = (unsigned long)shared_arch_info;
-    __vmx_switch_rr7(rid, guest_vhpt, shared_arch_info);
+    __vmx_switch_rr7(rid, (void *)v->arch.vhpt.hash, v->arch.privregs);
+}
+
+void vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid)
+{
+    __get_cpu_var(inserted_vhpt) = (unsigned long)v->arch.vhpt.hash;
+    __get_cpu_var(inserted_vpd) = (unsigned long)v->arch.privregs;
+    __get_cpu_var(inserted_mapped_regs) = (unsigned long)v->arch.privregs;
+    __vmx_switch_rr7_vcpu(v, rid);
 }
 
 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
@@ -218,8 +222,7 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
     switch((u64)(reg>>VRN_SHIFT)) {
     case VRN7:
         if (likely(vcpu == current))
-            vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash,
-                           vcpu->arch.privregs);
+            vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, val));
        break;
     case VRN4:
         rrval = vrrtomrr(vcpu,val);
index d601b8e2141a550a3053aa4f2fdae97b4f4f3c58..f2ef96c06060f8c73d7c25efec8d7fe653ec8daf 100644 (file)
 #include <asm/vcpu.h>
 #include <asm/percpu.h>
 #include <asm/pal.h>
+#include <asm/vmx_vcpu.h>
 
 /* Defined in xemasm.S  */
-extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long va_vhpt);
+extern void ia64_new_rr7(unsigned long rid, void *shared_info,
+                        void *shared_arch_info, unsigned long shared_info_va,
+                        unsigned long va_vhpt);
 extern void ia64_new_rr7_efi(unsigned long rid, unsigned long repin_percpu,
                             unsigned long vpd);
 
@@ -239,6 +242,14 @@ set_rr(unsigned long rr, unsigned long rrval)
        ia64_srlz_d();
 }
 
+static inline void
+ia64_new_rr7_vcpu(struct vcpu *v, unsigned long rid)
+{
+       ia64_new_rr7(rid, v->domain->shared_info,
+                    v->arch.privregs, v->domain->arch.shared_info_va,
+                    __va_ul(vcpu_vhpt_maddr(v)));
+}
+
 // validates and changes a single region register
 // in the currently executing domain
 // Passing a value of -1 is a (successful) no-op
@@ -282,9 +293,7 @@ int set_one_rr(unsigned long rr, unsigned long val)
                __get_cpu_var(inserted_mapped_regs) =
                                        v->domain->arch.shared_info_va +
                                        XMAPPEDREGS_OFS;
-               ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
-                            v->arch.privregs, v->domain->arch.shared_info_va,
-                            __va_ul(vcpu_vhpt_maddr(v)));
+               ia64_new_rr7_vcpu(v, vmMangleRID(newrrv.rrval));
        } else {
                set_rr(rr,newrrv.rrval);
        }
@@ -312,6 +321,31 @@ int set_one_rr_efi(unsigned long rr, unsigned long val)
        return 1;
 }
 
+void
+set_one_rr_efi_restore(unsigned long rr, unsigned long val)
+{
+       unsigned long rreg = REGION_NUMBER(rr);
+       
+       BUG_ON(rreg != 6 && rreg != 7);
+
+       if (rreg == 6) {
+               ia64_set_rr(rr, val);
+               ia64_srlz_d();
+       } else {
+               /* firmware call is done very early before struct vcpu
+                  and strcut domain are initialized. */
+               if (unlikely(current == NULL || current->domain == NULL ||
+                            is_idle_vcpu(current)))
+                       ia64_new_rr7_efi(val, cpu_isset(smp_processor_id(),
+                                                       percpu_set),
+                                        0UL);
+               else if (VMX_DOMAIN(current))
+                       __vmx_switch_rr7_vcpu(current, val);
+               else
+                       ia64_new_rr7_vcpu(current, val);
+       }
+}
+
 void set_virtual_rr0(void)
 {
        struct vcpu *v = current;
index 1cbc86f02b369cf858efcb9e3ae416b4c0ab29ff..e96cb6b19411ff866e6818fd9961713d7e6089b9 100644 (file)
@@ -487,8 +487,8 @@ struct efi_generic_dev_path {
 #define XEN_EFI_RR_LEAVE(rr6, rr7) do {                        \
        if (rr7 != XEN_EFI_RR) {                        \
                efi_unmap_pal_code();                   \
-               set_one_rr_efi(6UL << 61, rr6);         \
-               set_one_rr_efi(7UL << 61, rr7);         \
+               set_one_rr_efi_restore(6UL << 61, rr6); \
+               set_one_rr_efi_restore(7UL << 61, rr7); \
        }                                               \
 } while (0)
 
index 9d9f21f741b85c9e72a977b497b30290633d025d..efd1732622d8be5ad3b2399500aaa67b468c4d1a 100644 (file)
@@ -46,6 +46,7 @@ extern cpumask_t percpu_set;
 
 int set_one_rr(unsigned long rr, unsigned long val);
 int set_one_rr_efi(unsigned long rr, unsigned long val);
+void set_one_rr_efi_restore(unsigned long rr, unsigned long val);
 
 // This function is purely for performance... apparently scrambling
 //  bits in the region id makes for better hashing, which means better
index 635542b930835dcf6adf16132a670302e36301f7..750e6a86ff6eaf1467458ebba19d0a0f2d676a80 100644 (file)
@@ -105,8 +105,8 @@ extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
 extern void vcpu_load_kernel_regs(VCPU * vcpu);
 extern void __vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
                              void *shared_arch_info);
-extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
-                           void *shared_arch_info);
+extern void __vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid);
+extern void vmx_switch_rr7_vcpu(struct vcpu *v, unsigned long rid);
 extern void vmx_ia64_set_dcr(VCPU * v);
 extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
 extern void vmx_asm_bsw0(void);